vfparch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
if ( vfparch < 2 )
- panic("Xen only support VFP 3\n");
+ panic("Xen only support VFP 3");
return 0;
}
asmlinkage void do_trap_serror(struct cpu_user_regs *regs)
{
- panic("Unhandled serror trap\n");
+ panic("Unhandled serror trap");
}
static const char *handler[]= {
}
if ( !pg )
- panic("Failed to allocate contiguous memory for dom0\n");
+ panic("Failed to allocate contiguous memory for dom0");
spfn = page_to_mfn(pg);
start = pfn_to_paddr(spfn);
res = guest_physmap_add_page(d, spfn, spfn, order);
if ( res )
- panic("Unable to add pages in DOM0: %d\n", res);
+ panic("Unable to add pages in DOM0: %d", res);
kinfo->mem.bank[0].start = start;
kinfo->mem.bank[0].size = size;
reg = dt_get_property(memory, "reg", ®_len);
if ( reg == NULL )
- panic("Memory node has no reg property!\n");
+ panic("Memory node has no reg property");
for ( l = 0;
kinfo->unassigned_mem > 0 && l + reg_size <= reg_len
paddr_t start, size;
if ( dt_device_get_address(memory, bank, &start, &size) )
- panic("Unable to retrieve the bank %u for %s\n",
+ panic("Unable to retrieve the bank %u for %s",
bank, dt_node_full_name(memory));
if ( size > kinfo->unassigned_mem )
printk("Populate P2M %#"PRIx64"->%#"PRIx64"\n",
start, start + size);
if ( p2m_populate_ram(d, start, start + size) < 0 )
- panic("Failed to populate P2M\n");
+ panic("Failed to populate P2M");
kinfo->mem.bank[kinfo->mem.nr_banks].start = start;
kinfo->mem.bank[kinfo->mem.nr_banks].size = size;
kinfo->mem.nr_banks++;
rc = raw_copy_to_guest_flush_dcache(dtb_virt, kinfo->fdt,
fdt_totalsize(kinfo->fdt));
if ( rc != 0 )
- panic("Unable to copy the DTB to dom0 memory (rc = %lu)\n", rc);
+ panic("Unable to copy the DTB to dom0 memory (rc = %lu)", rc);
xfree(kinfo->fdt);
}
res = fdt_setprop_inplace_cell(kinfo->fdt, node, "linux,initrd-start",
load_addr);
if ( res )
- panic("Cannot fix up \"linux,initrd-start\" property\n");
+ panic("Cannot fix up \"linux,initrd-start\" property");
res = fdt_setprop_inplace_cell(kinfo->fdt, node, "linux,initrd-end",
load_addr + len);
if ( res )
- panic("Cannot fix up \"linux,initrd-end\" property\n");
+ panic("Cannot fix up \"linux,initrd-end\" property");
for ( offs = 0; offs < len; )
{
rc = gvirt_to_maddr(load_addr + offs, &ma);
if ( rc )
{
- panic("\nUnable to translate guest address\n");
+ panic("Unable to translate guest address");
return;
}
early_vprintk(fmt, args);
va_end(args);
- early_printk("\nEarly Panic: Stopping\n");
+ early_printk("\n\nEarly Panic: Stopping\n");
while(1);
}
node = dt_find_interrupt_controller(gic_ids);
if ( !node )
- panic("Unable to find compatible GIC in the device tree\n");
+ panic("Unable to find compatible GIC in the device tree");
dt_device_set_used_by(node, DOMID_XEN);
res = dt_device_get_address(node, 0, &gic.dbase, NULL);
if ( res || !gic.dbase || (gic.dbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the distributor\n");
+ panic("GIC: Cannot find a valid address for the distributor");
res = dt_device_get_address(node, 1, &gic.cbase, NULL);
if ( res || !gic.cbase || (gic.cbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the CPU\n");
+ panic("GIC: Cannot find a valid address for the CPU");
res = dt_device_get_address(node, 2, &gic.hbase, NULL);
if ( res || !gic.hbase || (gic.hbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the hypervisor\n");
+ panic("GIC: Cannot find a valid address for the hypervisor");
res = dt_device_get_address(node, 3, &gic.vbase, NULL);
if ( res || !gic.vbase || (gic.vbase & ~PAGE_MASK) )
- panic("GIC: Cannot find a valid address for the virtual CPU\n");
+ panic("GIC: Cannot find a valid address for the virtual CPU");
res = dt_device_get_irq(node, 0, &gic.maintenance);
if ( res )
- panic("GIC: Cannot find the maintenance IRQ\n");
+ panic("GIC: Cannot find the maintenance IRQ");
/* Set the GIC as the primary interrupt controller */
dt_interrupt_controller = node;
if ( (gic.dbase & ~PAGE_MASK) || (gic.cbase & ~PAGE_MASK) ||
(gic.hbase & ~PAGE_MASK) || (gic.vbase & ~PAGE_MASK) )
- panic("GIC interfaces not page aligned.\n");
+ panic("GIC interfaces not page aligned");
set_fixmap(FIXMAP_GICD, gic.dbase >> PAGE_SHIFT, DEV_SHARED);
BUILD_BUG_ON(FIXMAP_ADDR(FIXMAP_GICC1) !=
smp_call_function_interrupt();
break;
default:
- panic("Unhandled SGI %d on CPU%d\n", sgi, smp_processor_id());
+ panic("Unhandled SGI %d on CPU%d", sgi, smp_processor_id());
break;
}
paddr_t addr;
if ( total + kernel_size > mem_size )
- panic("Not enough memory in the first bank for the dtb+initrd.");
+ panic("Not enough memory in the first bank for the dtb+initrd");
/*
* DTB must be loaded such that it does not conflict with the
addr = kernel_start - total;
else
{
- panic("Unable to find suitable location for dtb+initrd.");
+ panic("Unable to find suitable location for dtb+initrd");
return;
}
rc = gvirt_to_maddr(load_addr + offs, &ma);
if ( rc )
{
- panic("\nUnable to map translate guest address\n");
+ panic("Unable to map translate guest address");
return;
}
info->kernel_order = get_order_from_bytes(size);
info->kernel_img = alloc_xenheap_pages(info->kernel_order, 0);
if ( info->kernel_img == NULL )
- panic("Cannot allocate temporary buffer for kernel.\n");
+ panic("Cannot allocate temporary buffer for kernel");
copy_from_paddr(info->kernel_img, addr, size, info->load_attr);
xenheap_mfn_start = base_mfn;
if ( base_mfn < xenheap_mfn_start )
- early_panic("cannot add xenheap mapping at %lx below heap start %lx\n",
+ early_panic("cannot add xenheap mapping at %lx below heap start %lx",
base_mfn, xenheap_mfn_start);
end_mfn = base_mfn + nr_mfns;
res = platform->init();
if ( res )
- panic("Unable to initialize the platform\n");
+ panic("Unable to initialize the platform");
}
int __init platform_init_time(void)
}
if ( !paddr )
- early_panic("Not enough memory to relocate Xen\n");
+ early_panic("Not enough memory to relocate Xen");
early_printk("Placing Xen at 0x%"PRIpaddr"-0x%"PRIpaddr"\n",
paddr, paddr + min_size);
/* TODO: Handle non-contiguous memory bank */
if ( !early_info.mem.nr_banks )
- early_panic("No memory bank\n");
+ early_panic("No memory bank");
ram_start = early_info.mem.bank[0].start;
ram_size = early_info.mem.bank[0].size;
ram_end = ram_start + ram_size;
} while ( xenheap_pages > 128<<(20-PAGE_SHIFT) );
if ( ! e )
- early_panic("Not not enough space for xenheap\n");
+ early_panic("Not not enough space for xenheap");
domheap_pages = heap_pages - xenheap_pages;
/* Create initial domain 0. */
dom0 = domain_create(0, 0, 0);
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
- panic("Error creating domain 0\n");
+ panic("Error creating domain 0");
dom0->is_privileged = 1;
dom0->target = NULL;
if ( construct_dom0(dom0) != 0)
- panic("Could not set up DOM0 guest OS\n");
+ panic("Could not set up DOM0 guest OS");
/* Scrub RAM that is still free and so may go to an unprivileged domain.
XXX too slow in simulator
{
if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, cpu)) ||
!zalloc_cpumask_var(&per_cpu(cpu_core_mask, cpu)) )
- panic("No memory for CPU sibling/core maps\n");
+ panic("No memory for CPU sibling/core maps");
/* A CPU is a sibling with itself and is always on its own core. */
cpumask_set_cpu(cpu, per_cpu(cpu_sibling_mask, cpu));
dev = dt_find_matching_node(NULL, timer_ids);
if ( !dev )
- panic("Unable to find a compatible timer in the device tree\n");
+ panic("Unable to find a compatible timer in the device tree");
dt_device_set_used_by(dev, DOMID_XEN);
{
res = dt_device_get_irq(dev, i, &timer_irq[i]);
if ( res )
- panic("Timer: Unable to retrieve IRQ %u from the device tree\n", i);
+ panic("Timer: Unable to retrieve IRQ %u from the device tree", i);
}
printk("Generic Timer IRQ: phys=%u hyp=%u virt=%u\n",
res = platform_init_time();
if ( res )
- panic("Timer: Cannot initialize platform timer\n");
+ panic("Timer: Cannot initialize platform timer");
/* Check that this CPU supports the Generic Timer interface */
if ( !cpu_has_gentimer )
- panic("CPU does not support the Generic Timer v1 interface.\n");
+ panic("CPU does not support the Generic Timer v1 interface");
res = dt_property_read_u32(dev, "clock-frequency", &rate);
if ( res )
second_in_first ? " during second stage lookup" : "",
fsc_level_str(level));
- panic("Error during Hypervisor-to-physical address translation\n");
+ panic("Error during Hypervisor-to-physical address translation");
}
static void cpsr_switch_mode(struct cpu_user_regs *regs, int mode)
show_execution_state(regs);
break;
default:
- panic("DOM%d: Unhandled debug trap %#x\n", domid, code);
+ panic("DOM%d: Unhandled debug trap %#x", domid, code);
break;
}
}
printk("%s p15, %d, r%d, cr%d, cr%d, %d @ 0x%"PRIregister"\n",
cp32.read ? "mrc" : "mcr",
cp32.op1, cp32.reg, cp32.crn, cp32.crm, cp32.op2, regs->pc);
- panic("unhandled 32-bit CP15 access %#x\n", hsr.bits & HSR_CP32_REGS_MASK);
+ panic("unhandled 32-bit CP15 access %#x", hsr.bits & HSR_CP32_REGS_MASK);
}
advance_pc(regs, hsr);
}
printk("%s p15, %d, r%d, r%d, cr%d @ 0x%"PRIregister"\n",
cp64.read ? "mrrc" : "mcrr",
cp64.op1, cp64.reg1, cp64.reg2, cp64.crm, regs->pc);
- panic("unhandled 64-bit CP15 access %#x\n", hsr.bits & HSR_CP64_REGS_MASK);
+ panic("unhandled 64-bit CP15 access %#x", hsr.bits & HSR_CP64_REGS_MASK);
}
advance_pc(regs, hsr);
}
sysreg.op2,
sysreg.read ? "=>" : "<=",
sysreg.reg, regs->pc);
- panic("unhandled 64-bit sysreg access %#x\n",
+ panic("unhandled 64-bit sysreg access %#x",
hsr.bits & HSR_SYSREG_REGS_MASK);
}
return;
}
panic("x2APIC: already enabled by BIOS, but "
- "iommu_supports_eim failed!\n");
+ "iommu_supports_eim failed");
}
if ( (ioapic_entries = alloc_ioapic_entries()) == NULL )
{
if ( x2apic_enabled )
panic("Interrupt remapping could not be enabled while "
- "x2APIC is already enabled by BIOS!\n");
+ "x2APIC is already enabled by BIOS");
printk(XENLOG_ERR
"Failed to enable Interrupt Remapping: Will not enable x2APIC.\n");
static __init void error(char *x)
{
- panic("%s\n", x);
+ panic("%s", x);
}
static __init int fill_inbuf(void)
if (!cpu_has_amd_erratum(c, AMD_ERRATUM_121))
opt_allow_unsafe = 1;
else if (opt_allow_unsafe < 0)
- panic("Xen will not boot on this CPU for security reasons.\n"
+ panic("Xen will not boot on this CPU for security reasons"
"Pass \"allow_unsafe\" if you're trusting all your"
" (PV) guest kernels.\n");
else if (!opt_allow_unsafe && c == &boot_cpu_data)
* recovery job but to reset the system.
*/
if (atomic_read(&found_error) == 0)
- mc_panic("MCE: No CPU found valid MCE, need reset\n");
+ mc_panic("MCE: No CPU found valid MCE, need reset");
if (!cpumask_empty(&mce_fatal_cpus))
{
char *ebufp, ebuf[96] = "MCE: Fatal error happened on CPUs ";
dprintk(XENLOG_ERR, "MCE delayed action failed\n");
is_mc_panic = 1;
x86_mcinfo_dump(mctelem_dataptr(mctc));
- panic("MCE: Software recovery failed for the UCR\n");
+ panic("MCE: Software recovery failed for the UCR");
break;
case MCER_RECOVERED:
dprintk(XENLOG_INFO, "MCE: Error is successfully recovered\n");
value = (parms.virt_hv_start_low + mask) & ~mask;
BUG_ON(!is_pv_32bit_domain(d));
if ( value > __HYPERVISOR_COMPAT_VIRT_START )
- panic("Domain 0 expects too high a hypervisor start address.\n");
+ panic("Domain 0 expects too high a hypervisor start address");
HYPERVISOR_COMPAT_VIRT_START(d) =
max_t(unsigned int, m2p_compat_vstart, value);
}
count -= PAGE_ALIGN(initrd_len);
order = get_order_from_bytes(count);
if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages )
- panic("Domain 0 allocation is too small for kernel image.\n");
+ panic("Domain 0 allocation is too small for kernel image");
if ( parms.p2m_base != UNSET_ADDR )
{
}
page = alloc_domheap_pages(d, order, 0);
if ( page == NULL )
- panic("Not enough RAM for domain 0 allocation.\n");
+ panic("Not enough RAM for domain 0 allocation");
alloc_spfn = page_to_mfn(page);
alloc_epfn = alloc_spfn + d->tot_pages;
order = get_order_from_pages(count);
page = alloc_domheap_pages(d, order, 0);
if ( !page )
- panic("Not enough RAM for domain 0 initrd.\n");
+ panic("Not enough RAM for domain 0 initrd");
for ( count = -count; order--; )
if ( count & (1UL << order) )
{
{
page = alloc_domheap_page(NULL, 0);
if ( !page )
- panic("Not enough RAM for domain 0 PML4.\n");
+ panic("Not enough RAM for domain 0 PML4");
page->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
l4start = l4tab = page_to_virt(page);
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
{
if ( d->tot_pages + ((round_pgup(vphysmap_end) - va)
>> PAGE_SHIFT) + 3 > nr_pages )
- panic("Dom0 allocation too small for initial P->M table.\n");
+ panic("Dom0 allocation too small for initial P->M table");
if ( l1tab )
{
va &= PAGE_MASK;
}
if ( !page )
- panic("Not enough RAM for DOM0 P->M table.\n");
+ panic("Not enough RAM for DOM0 P->M table");
}
if ( l1tab )
while ( pfn < nr_pages )
{
if ( (page = alloc_chunk(d, nr_pages - d->tot_pages)) == NULL )
- panic("Not enough RAM for DOM0 reservation.\n");
+ panic("Not enough RAM for DOM0 reservation");
while ( pfn < d->tot_pages )
{
mfn = page_to_mfn(page);
printk("Dom0 runs in ring 0 (supervisor mode)\n");
if ( !test_bit(XENFEAT_supervisor_mode_kernel,
parms.f_supported) )
- panic("Dom0 does not support supervisor-mode execution\n");
+ panic("Dom0 does not support supervisor-mode execution");
}
else
{
if ( test_bit(XENFEAT_supervisor_mode_kernel, parms.f_required) )
- panic("Dom0 requires supervisor-mode execution\n");
+ panic("Dom0 requires supervisor-mode execution");
}
rc = 0;
/* Guest already enabled an interrupt window. */
return;
default:
- panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x\n",
+ panic("%s: nestedsvm_vcpu_interrupt can't handle value %#x",
__func__, rc);
}
}
if (!physid_isset(i, phys_id_present_map))
break;
if (i >= get_physical_broadcast())
- panic("Max APIC ID exceeded!\n");
+ panic("Max APIC ID exceeded");
printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
i);
mp_ioapics[apic].mpc_apicid = i;
}
if (i == get_physical_broadcast())
- panic("Max apic_id exceeded!\n");
+ panic("Max apic_id exceeded");
printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
"trying %d\n", ioapic, apic_id, i);
l->unlock_level = __get_lock_level();
}
else if ( (unlikely(!rec)) )
- panic("mm lock already held by %s\n", l->locker_function);
+ panic("mm lock already held by %s", l->locker_function);
__set_lock_level(level);
}
if (nr_ioapics >= MAX_IO_APICS) {
printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
+ panic("Recompile kernel with bigger MAX_IO_APICS");
}
if (!m->mpc_apicaddr) {
printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded!!\n");
+ panic("Max # of irq sources exceeded");
}
static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
if (nr_ioapics >= MAX_IO_APICS) {
printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
"(found %d)\n", MAX_IO_APICS, nr_ioapics);
- panic("Recompile kernel with bigger MAX_IO_APICS!\n");
+ panic("Recompile kernel with bigger MAX_IO_APICS");
}
if (!address) {
printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
mp_irqs[mp_irq_entries] = intsrc;
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded!\n");
+ panic("Max # of irq sources exceeded");
return;
}
mp_irqs[mp_irq_entries] = intsrc;
if (++mp_irq_entries == MAX_IRQ_SOURCES)
- panic("Max # of irq sources exceeded!\n");
+ panic("Max # of irq sources exceeded");
}
}
watchdog_setup();
if ( !tboot_protect_mem_regions() )
- panic("Could not protect TXT memory regions\n");
+ panic("Could not protect TXT memory regions");
/* Create initial domain 0. */
dom0 = domain_create(0, DOMCRF_s3_integrity, 0);
if ( IS_ERR(dom0) || (alloc_dom0_vcpu0() == NULL) )
- panic("Error creating domain 0\n");
+ panic("Error creating domain 0");
dom0->is_privileged = 1;
dom0->target = NULL;
(initrdidx > 0) && (initrdidx < mbi->mods_count)
? mod + initrdidx : NULL,
bootstrap_map, cmdline) != 0)
- panic("Could not set up DOM0 guest OS\n");
+ panic("Could not set up DOM0 guest OS");
/* Scrub RAM that is still free and so may go to an unprivileged domain. */
scrub_heap_pages();
if ( !zalloc_cpumask_var(&per_cpu(cpu_sibling_mask, 0)) ||
!zalloc_cpumask_var(&per_cpu(cpu_core_mask, 0)) )
- panic("No memory for boot CPU sibling/core maps\n");
+ panic("No memory for boot CPU sibling/core maps");
set_cpu_sibling_map(0);
printk("MAC for %s before S3 is: 0x%08"PRIx64"\n", what, orig_mac);
printk("MAC for %s after S3 is: 0x%08"PRIx64"\n", what, resume_mac);
- panic("Memory integrity was lost on resume (%d)\n", error);
+ panic("Memory integrity was lost on resume (%d)", error);
}
int tboot_wake_ap(int apicid, unsigned long sipi_vec)
}
if ( unlikely(acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_CMOS_RTC) )
- panic("System without CMOS RTC must be booted from EFI\n");
+ panic("System without CMOS RTC must be booted from EFI");
spin_lock_irqsave(&rtc_lock, flags);
}
panic("FATAL TRAP: vector = %d (%s)\n"
- "[error_code=%04x] %s\n",
+ "[error_code=%04x] %s",
trapnr, trapstr(trapnr), regs->error_code,
(regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
}
show_execution_state(regs);
panic("FATAL TRAP: vector = %d (%s)\n"
- "[error_code=%04x]\n",
+ "[error_code=%04x]",
trapnr, trapstr(trapnr), regs->error_code);
}
printk("Xen BUG at %s%s:%d\n", prefix, filename, lineno);
DEBUGGER_trap_fatal(TRAP_invalid_op, regs);
show_execution_state(regs);
- panic("Xen BUG at %s%s:%d\n", prefix, filename, lineno);
+ panic("Xen BUG at %s%s:%d", prefix, filename, lineno);
case BUGFRAME_assert:
/* ASSERT: decode the predicate string pointer. */
predicate, prefix, filename, lineno);
DEBUGGER_trap_fatal(TRAP_invalid_op, regs);
show_execution_state(regs);
- panic("Assertion '%s' failed at %s%s:%d\n",
+ panic("Assertion '%s' failed at %s%s:%d",
predicate, prefix, filename, lineno);
}
}
DEBUGGER_trap_fatal(TRAP_invalid_op, regs);
show_execution_state(regs);
- panic("FATAL TRAP: vector = %d (invalid opcode)\n", TRAP_invalid_op);
+ panic("FATAL TRAP: vector = %d (invalid opcode)", TRAP_invalid_op);
}
void do_int3(struct cpu_user_regs *regs)
show_page_walk(addr);
panic("FATAL PAGE FAULT\n"
"[error_code=%04x]\n"
- "Faulting linear address: %p\n",
+ "Faulting linear address: %p",
error_code, _p(addr));
}
hardware_gp:
show_execution_state(regs);
- panic("GENERAL PROTECTION FAULT\n[error_code=%04x]\n", regs->error_code);
+ panic("GENERAL PROTECTION FAULT\n[error_code=%04x]", regs->error_code);
}
static DEFINE_PER_CPU(struct softirq_trap, softirq_trap);
return;
nomem:
- panic("Not enough memory for m2p table\n");
+ panic("Not enough memory for m2p table");
}
void __init zap_low_mappings(void)
_show_registers(regs, crs, CTXT_hypervisor, NULL);
show_stack_overflow(cpu, regs);
- panic("DOUBLE FAULT -- system shutdown\n");
+ panic("DOUBLE FAULT -- system shutdown");
}
void toggle_guest_mode(struct vcpu *v)
static void __init boot_bug(int line)
{
- panic("Boot BUG at %s:%d\n", __FILE__, line);
+ panic("Boot BUG at %s:%d", __FILE__, line);
}
#define BOOT_BUG_ON(p) if ( p ) boot_bug(__LINE__);
printk("Using scheduler: %s (%s)\n", ops.name, ops.opt_name);
if ( SCHED_OP(&ops, init) )
- panic("scheduler returned error on init\n");
+ panic("scheduler returned error on init");
if ( sched_ratelimit_us &&
(sched_ratelimit_us > XEN_SYSCTL_SCHED_RATELIMIT_MAX
console_start_sync();
printk("\n****************************************\n");
printk("Panic on CPU %d:\n", smp_processor_id());
- printk("%s", buf);
+ printk("%s\n", buf);
printk("****************************************\n\n");
if ( opt_noreboot )
printk("Manual reset required ('noreboot' specified)\n");
console_start_sync();
printk("Xen BUG at %s:%d\n", file, line);
dump_execution_state();
- panic("Xen BUG at %s:%d\n", file, line);
+ panic("Xen BUG at %s:%d", file, line);
for ( ; ; ) ;
}
if ( (force_iommu && !iommu_enabled) ||
(force_intremap && !iommu_intremap) )
- panic("Couldn't enable %s and iommu=required/force\n",
+ panic("Couldn't enable %s and iommu=required/force",
!iommu_enabled ? "IOMMU" : "Interrupt Remapping");
if ( !iommu_enabled )
{
radix_tree_init(&pci_segments);
if ( !alloc_pseg(0) )
- panic("Could not initialize PCI segment 0\n");
+ panic("Could not initialize PCI segment 0");
}
int __init pci_add_segment(u16 seg)
break; \
if ( NOW() > start_time + DMAR_OPERATION_TIMEOUT ) { \
if ( !kexecing ) \
- panic("%s:%d:%s: DMAR hardware is malfunctional\n",\
+ panic("%s:%d:%s: DMAR hardware is malfunctional",\
__FILE__, __LINE__, __func__); \
else \
break; \
if ( is_igd_drhd(drhd) && !is_igd_vt_enabled_quirk() )
{
if ( force_iommu )
- panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose!\n");
+ panic("BIOS did not enable IGD for VT properly, crash Xen for security purpose");
else
{
dprintk(XENLOG_WARNING VTDPREFIX,
return;
if ( init_vtd_hw() != 0 && force_iommu )
- panic("IOMMU setup failed, crash Xen for security purpose!\n");
+ panic("IOMMU setup failed, crash Xen for security purpose");
for_each_drhd_unit ( drhd )
{
if ( NOW() > (start_time + DMAR_OPERATION_TIMEOUT) )
{
print_qi_regs(iommu);
- panic("queue invalidate wait descriptor was not executed\n");
+ panic("queue invalidate wait descriptor was not executed");
}
cpu_relax();
}
original_ops = xsm_ops;
if ( register_xsm(&flask_ops) )
- panic("Flask: Unable to register with XSM.\n");
+ panic("Flask: Unable to register with XSM");
ret = security_load_policy(policy_buffer, policy_size);